From 253492576c7589dd8ff761d0b4383f6e42bb1ecb Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Tue, 17 May 2005 09:14:58 +0000 Subject: [PATCH] bitkeeper revision 1.1427 (4289b612hOngj49yfggKcA17ckis2g) Xen saves the upcall mask onto the stack when making an upcall to the guest. This can be used by the guest to determine whether it must re-enable event delivery on return from the upcall activation. Signed-off-by: Keir Fraser --- .../arch/xen/i386/kernel/entry.S | 95 +++++++------------ xen/arch/x86/traps.c | 8 +- xen/arch/x86/x86_32/asm-offsets.c | 1 + xen/arch/x86/x86_32/entry.S | 12 ++- xen/arch/x86/x86_32/seg_fixup.c | 4 +- xen/arch/x86/x86_64/asm-offsets.c | 1 + xen/arch/x86/x86_64/entry.S | 16 ++-- xen/arch/x86/x86_64/traps.c | 2 +- xen/include/public/arch-x86_32.h | 14 +-- xen/include/public/arch-x86_64.h | 34 +++---- 10 files changed, 85 insertions(+), 102 deletions(-) diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S index ad7b3f44ad..897759b1a6 100644 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S @@ -83,42 +83,28 @@ VM_MASK = 0x00020000 #define sizeof_vcpu_shift 3 #ifdef CONFIG_SMP -#define XEN_GET_VCPU_INFO(reg) #define preempt_disable(reg) incl TI_preempt_count(reg) #define preempt_enable(reg) decl TI_preempt_count(reg) -#define XEN_LOCK_VCPU_INFO_SMP(reg) preempt_disable(%ebp) ; \ +#define XEN_GET_VCPU_INFO(reg) preempt_disable(%ebp) ; \ movl TI_cpu(%ebp),reg ; \ shl $sizeof_vcpu_shift,reg ; \ addl HYPERVISOR_shared_info,reg -#define XEN_UNLOCK_VCPU_INFO_SMP(reg) preempt_enable(%ebp) -#define XEN_UNLOCK_VCPU_INFO_SMP_fixup .byte 0xff,0xff,0xff -#define Ux00 0xff -#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg) -#define XEN_BLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \ - XEN_LOCKED_BLOCK_EVENTS(reg) ; \ - XEN_UNLOCK_VCPU_INFO_SMP(reg) -#define XEN_UNBLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \ - movb $0,evtchn_upcall_mask(reg) ; \ - XEN_UNLOCK_VCPU_INFO_SMP(reg) -#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) GET_THREAD_INFO(%ebp) ; \ - XEN_LOCK_VCPU_INFO_SMP(reg) ; \ - movb evtchn_upcall_mask(reg), tmp ; \ - movb tmp, off(%esp) ; \ - XEN_UNLOCK_VCPU_INFO_SMP(reg) +#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp) +#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff #else -#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg -#define XEN_LOCK_VCPU_INFO_SMP(reg) -#define XEN_UNLOCK_VCPU_INFO_SMP(reg) -#define XEN_UNLOCK_VCPU_INFO_SMP_fixup -#define Ux00 0x00 -#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg) -#define XEN_BLOCK_EVENTS(reg) XEN_LOCKED_BLOCK_EVENTS(reg) -#define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg) -#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \ - movb evtchn_upcall_mask(reg), tmp; \ - movb tmp, off(%esp) +#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg +#define XEN_PUT_VCPU_INFO(reg) +#define XEN_PUT_VCPU_INFO_fixup #endif +#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg) +#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg) +#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \ + XEN_LOCKED_BLOCK_EVENTS(reg) ; \ + XEN_PUT_VCPU_INFO(reg) +#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \ + XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \ + XEN_PUT_VCPU_INFO(reg) #define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg) #ifdef CONFIG_PREEMPT @@ -128,7 +114,7 @@ VM_MASK = 0x00020000 #define resume_kernel restore_all #endif -#define SAVE_ALL_NO_EVENTMASK \ +#define SAVE_ALL \ cld; \ pushl %es; \ pushl %ds; \ @@ -141,12 +127,7 @@ VM_MASK = 0x00020000 pushl %ebx; \ movl $(__USER_DS), %edx; \ movl %edx, %ds; \ - movl %edx, %es; - -#define SAVE_ALL \ - SAVE_ALL_NO_EVENTMASK; \ - XEN_GET_VCPU_INFO(%esi); \ - XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK) + movl %edx, %es #define RESTORE_INT_REGS \ popl %ebx; \ @@ -196,7 +177,6 @@ ENTRY(ret_from_fork) call schedule_tail GET_THREAD_INFO(%ebp) popl %eax - XEN_GET_VCPU_INFO(%esi) jmp syscall_exit /* @@ -217,7 +197,6 @@ ret_from_intr: testl $(VM_MASK | 2), %eax jz resume_kernel # returning to kernel or vm86-space ENTRY(resume_userspace) - XEN_GET_VCPU_INFO(%esi) XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt # setting need_resched or sigpending # between sampling and the iret @@ -229,7 +208,6 @@ ENTRY(resume_userspace) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) - XEN_GET_VCPU_INFO(%esi) XEN_BLOCK_EVENTS(%esi) cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? jnz restore_all @@ -316,11 +294,11 @@ restore_all: jnz resume_vm86 movb EVENT_MASK(%esp), %al notb %al # %al == ~saved_mask - XEN_LOCK_VCPU_INFO_SMP(%esi) + XEN_GET_VCPU_INFO(%esi) andb evtchn_upcall_mask(%esi),%al andb $1,%al # %al == mask & ~saved_mask jnz restore_all_enable_events # != 0 => reenable event delivery - XEN_UNLOCK_VCPU_INFO_SMP(%esi) + XEN_PUT_VCPU_INFO(%esi) RESTORE_ALL resume_vm86: @@ -470,8 +448,6 @@ error_code: movl %ecx, %ds movl %ecx, %es movl %esp,%eax # pt_regs pointer - XEN_GET_VCPU_INFO(%esi) - XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK) call *%edi jmp ret_from_exception @@ -488,29 +464,27 @@ error_code: # activation and restart the handler using the previous one. ENTRY(hypervisor_callback) pushl %eax - SAVE_ALL_NO_EVENTMASK + SAVE_ALL movl EIP(%esp),%eax cmpl $scrit,%eax jb 11f cmpl $ecrit,%eax jb critical_region_fixup -11: XEN_GET_VCPU_INFO(%esi) - movb $0, EVENT_MASK(%esp) - push %esp +11: push %esp call evtchn_do_upcall add $4,%esp jmp ret_from_intr ALIGN restore_all_enable_events: - XEN_UNBLOCK_EVENTS(%esi) + XEN_LOCKED_UNBLOCK_EVENTS(%esi) scrit: /**** START OF CRITICAL REGION ****/ XEN_TEST_PENDING(%esi) jnz 14f # process more events if necessary... - XEN_UNLOCK_VCPU_INFO_SMP(%esi) + XEN_PUT_VCPU_INFO(%esi) RESTORE_ALL 14: XEN_LOCKED_BLOCK_EVENTS(%esi) - XEN_UNLOCK_VCPU_INFO_SMP(%esi) + XEN_PUT_VCPU_INFO(%esi) jmp 11b ecrit: /**** END OF CRITICAL REGION ****/ # [How we do the fixup]. We want to merge the current stack frame with the @@ -523,15 +497,12 @@ ecrit: /**** END OF CRITICAL REGION ****/ critical_region_fixup: addl $critical_fixup_table-scrit,%eax movzbl (%eax),%eax # %eax contains num bytes popped -#ifdef CONFIG_SMP - cmpb $0xff,%al + cmpb $0xff,%al # 0xff => vcpu_info critical region jne 15f - add $1,%al GET_THREAD_INFO(%ebp) - XEN_UNLOCK_VCPU_INFO_SMP(%esi) -15: -#endif - mov %esp,%esi + XEN_PUT_VCPU_INFO(%esi) # abort vcpu_info critical region + xorl %eax,%eax +15: mov %esp,%esi add %eax,%esi # %esi points at end of src region mov %esp,%edi add $0x34,%edi # %edi points at end of dst region @@ -547,9 +518,9 @@ critical_region_fixup: jmp 11b critical_fixup_table: - .byte Ux00,Ux00,Ux00 # testb $0xff,(%esi) = XEN_TEST_PENDING - .byte Ux00,Ux00 # jnz 14f - XEN_UNLOCK_VCPU_INFO_SMP_fixup + .byte 0xff,0xff,0xff # testb $0xff,(%esi) = XEN_TEST_PENDING + .byte 0xff,0xff # jnz 14f + XEN_PUT_VCPU_INFO_fixup .byte 0x00 # pop %ebx .byte 0x04 # pop %ecx .byte 0x08 # pop %edx @@ -561,8 +532,8 @@ critical_fixup_table: .byte 0x20 # pop %es .byte 0x24,0x24,0x24 # add $4,%esp .byte 0x28 # iret - .byte Ux00,Ux00,Ux00,Ux00 # movb $1,1(%esi) - XEN_UNLOCK_VCPU_INFO_SMP_fixup + .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi) + XEN_PUT_VCPU_INFO_fixup .byte 0x00,0x00 # jmp 11b # Hypervisor uses this for application faults while it executes. @@ -766,8 +737,6 @@ ENTRY(page_fault) movl %eax, %ds movl %eax, %es movl %esp,%eax /* pt_regs pointer */ - XEN_GET_VCPU_INFO(%esi) - XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK) call do_page_fault jmp ret_from_exception diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 6e2c2ef163..8851e30b87 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -257,7 +257,7 @@ static inline int do_trap(int trapnr, char *str, tb->error_code = regs->error_code; } if ( TI_GET_IF(ti) ) - ed->vcpu_info->evtchn_upcall_mask = 1; + tb->flags |= TBF_INTERRUPT; return 0; xen_fault: @@ -322,7 +322,7 @@ asmlinkage int do_int3(struct cpu_user_regs *regs) tb->cs = ti->cs; tb->eip = ti->address; if ( TI_GET_IF(ti) ) - ed->vcpu_info->evtchn_upcall_mask = 1; + tb->flags |= TBF_INTERRUPT; return 0; } @@ -345,7 +345,7 @@ void propagate_page_fault(unsigned long addr, u16 error_code) tb->cs = ti->cs; tb->eip = ti->address; if ( TI_GET_IF(ti) ) - ed->vcpu_info->evtchn_upcall_mask = 1; + tb->flags |= TBF_INTERRUPT; ed->arch.guest_cr2 = addr; } @@ -911,7 +911,7 @@ asmlinkage int do_general_protection(struct cpu_user_regs *regs) tb->cs = ti->cs; tb->eip = ti->address; if ( TI_GET_IF(ti) ) - ed->vcpu_info->evtchn_upcall_mask = 1; + tb->flags |= TBF_INTERRUPT; return 0; gp_in_kernel: diff --git a/xen/arch/x86/x86_32/asm-offsets.c b/xen/arch/x86/x86_32/asm-offsets.c index 7015b2a861..40c58ab7ef 100644 --- a/xen/arch/x86/x86_32/asm-offsets.c +++ b/xen/arch/x86/x86_32/asm-offsets.c @@ -42,6 +42,7 @@ void __dummy__(void) OFFSET(UREGS_eflags, struct cpu_user_regs, eflags); OFFSET(UREGS_error_code, struct cpu_user_regs, error_code); OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector); + OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask); OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp); DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); BLANK(); diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S index 7737904a15..1f6f2ff1e8 100644 --- a/xen/arch/x86/x86_32/entry.S +++ b/xen/arch/x86/x86_32/entry.S @@ -288,8 +288,6 @@ test_all_events: movw %ax,TRAPBOUNCE_cs(%edx) movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx) call create_bounce_frame - movl EDOMAIN_vcpu_info(%ebx),%eax - movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery jmp test_all_events ALIGN @@ -330,14 +328,20 @@ ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */ movl UREGS_esp+4(%esp),%esi FLT13: movl UREGS_ss+4(%esp),%gs 1: /* Construct a stack frame: EFLAGS, CS/EIP */ + movb TRAPBOUNCE_flags(%edx),%cl subl $12,%esi movl UREGS_eip+4(%esp),%eax FLT14: movl %eax,%gs:(%esi) - movl UREGS_cs+4(%esp),%eax + movl EDOMAIN_vcpu_info(%ebx),%eax + pushl VCPUINFO_upcall_mask(%eax) + testb $TBF_INTERRUPT,%cl + setnz VCPUINFO_upcall_mask(%eax) # TBF_INTERRUPT -> clear upcall mask + popl %eax + shll $16,%eax # Bits 16-23: saved_upcall_mask + movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS FLT15: movl %eax,%gs:4(%esi) movl UREGS_eflags+4(%esp),%eax FLT16: movl %eax,%gs:8(%esi) - movb TRAPBOUNCE_flags(%edx),%cl test $TBF_EXCEPTION_ERRCODE,%cl jz 1f subl $4,%esi # push error_code onto guest frame diff --git a/xen/arch/x86/x86_32/seg_fixup.c b/xen/arch/x86/x86_32/seg_fixup.c index 0a3a17e455..484ff64fd4 100644 --- a/xen/arch/x86/x86_32/seg_fixup.c +++ b/xen/arch/x86/x86_32/seg_fixup.c @@ -275,7 +275,7 @@ int gpf_emulate_4gb(struct cpu_user_regs *regs) u32 disp32 = 0; u8 *eip; /* ptr to instruction start */ u8 *pb, b; /* ptr into instr. / current instr. byte */ - u32 *pseg = NULL; /* segment for memory operand (NULL=default) */ + u16 *pseg = NULL; /* segment for memory operand (NULL=default) */ /* WARNING: We only work for ring-3 segments. */ if ( unlikely(VM86_MODE(regs)) || unlikely(!RING_3(regs)) ) @@ -456,7 +456,7 @@ int gpf_emulate_4gb(struct cpu_user_regs *regs) tb->cs = ti->cs; tb->eip = ti->address; if ( TI_GET_IF(ti) ) - d->vcpu_info->evtchn_upcall_mask = 1; + tb->flags |= TBF_INTERRUPT; } return EXCRET_fault_fixed; diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index c6f3598b25..9d1f784a7b 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -46,6 +46,7 @@ void __dummy__(void) OFFSET(UREGS_eflags, struct cpu_user_regs, eflags); OFFSET(UREGS_rsp, struct cpu_user_regs, rsp); OFFSET(UREGS_ss, struct cpu_user_regs, ss); + OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask); OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es); DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); BLANK(); diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 6f4f23dc84..5c198494ab 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -147,8 +147,6 @@ test_all_events: movq %rax,TRAPBOUNCE_eip(%rdx) movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx) call create_bounce_frame - movq EDOMAIN_vcpu_info(%rbx),%rax - movb $1,VCPUINFO_upcall_mask(%rax) # Upcalls masked during delivery jmp test_all_events #ifdef CONFIG_VMX @@ -305,18 +303,24 @@ create_bounce_frame: movq $HYPERVISOR_VIRT_END+60,%rax cmpq %rax,%rsi jb domain_crash_synchronous # Above Xen private area? Then okay. -1: subq $40,%rsi +1: movb TRAPBOUNCE_flags(%rdx),%cl + subq $40,%rsi movq UREGS_ss+8(%rsp),%rax FLT2: movq %rax,32(%rsi) # SS movq UREGS_rsp+8(%rsp),%rax FLT3: movq %rax,24(%rsi) # RSP movq UREGS_eflags+8(%rsp),%rax FLT4: movq %rax,16(%rsi) # RFLAGS - movq UREGS_cs+8(%rsp),%rax -FLT5: movq %rax,8(%rsi) # CS + movq EDOMAIN_vcpu_info(%rbx),%rax + pushq VCPUINFO_upcall_mask(%rax) + testb $TBF_INTERRUPT,%cl + setnz VCPUINFO_upcall_mask(%eax)# TBF_INTERRUPT -> clear upcall mask + popq %rax + shll $16,%eax # Bits 16-23: saved_upcall_mask + movw UREGS_cs+8(%esp),%ax # Bits 0-15: CS +FLT5: movq %rax,8(%rsi) # CS/saved_upcall_mask movq UREGS_rip+8(%rsp),%rax FLT6: movq %rax,(%rsi) # RIP - movb TRAPBOUNCE_flags(%rdx),%cl testb $TBF_EXCEPTION_ERRCODE,%cl jz 1f subq $8,%rsi diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c index 853dd95a38..ae9cf424c2 100644 --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -12,7 +12,7 @@ void show_registers(struct cpu_user_regs *regs) { - printk("CPU: %d\nEIP: %04lx:[<%016lx>] \nEFLAGS: %016lx\n", + printk("CPU: %d\nEIP: %04x:[<%016lx>] \nEFLAGS: %016lx\n", smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags); printk("rax: %016lx rbx: %016lx rcx: %016lx rdx: %016lx\n", regs->rax, regs->rbx, regs->rcx, regs->rdx); diff --git a/xen/include/public/arch-x86_32.h b/xen/include/public/arch-x86_32.h index 8c57f7c787..35d97e2b3d 100644 --- a/xen/include/public/arch-x86_32.h +++ b/xen/include/public/arch-x86_32.h @@ -108,14 +108,16 @@ typedef struct cpu_user_regs { u16 error_code; /* private */ u16 entry_vector; /* private */ u32 eip; - u32 cs; + u16 cs; + u8 saved_upcall_mask; + u8 _pad0; u32 eflags; u32 esp; - u32 ss; - u32 es; - u32 ds; - u32 fs; - u32 gs; + u16 ss, _pad1; + u16 es, _pad2; + u16 ds, _pad3; + u16 fs, _pad4; + u16 gs, _pad5; } cpu_user_regs_t; typedef u64 tsc_timestamp_t; /* RDTSC timestamp */ diff --git a/xen/include/public/arch-x86_64.h b/xen/include/public/arch-x86_64.h index 2d68bb291b..8e821688f7 100644 --- a/xen/include/public/arch-x86_64.h +++ b/xen/include/public/arch-x86_64.h @@ -147,28 +147,30 @@ typedef struct cpu_user_regs { u64 r14; u64 r13; u64 r12; - union { u64 rbp, ebp; } PACKED; - union { u64 rbx, ebx; } PACKED; + union { u64 rbp, ebp; }; + union { u64 rbx, ebx; }; u64 r11; u64 r10; u64 r9; u64 r8; - union { u64 rax, eax; } PACKED; - union { u64 rcx, ecx; } PACKED; - union { u64 rdx, edx; } PACKED; - union { u64 rsi, esi; } PACKED; - union { u64 rdi, edi; } PACKED; + union { u64 rax, eax; }; + union { u64 rcx, ecx; }; + union { u64 rdx, edx; }; + union { u64 rsi, esi; }; + union { u64 rdi, edi; }; u32 error_code; /* private */ u32 entry_vector; /* private */ - union { u64 rip, eip; } PACKED; - u64 cs; - union { u64 rflags, eflags; } PACKED; - union { u64 rsp, esp; } PACKED; - u64 ss; - u64 es; - u64 ds; - u64 fs; /* Non-zero => takes precedence over fs_base. */ - u64 gs; /* Non-zero => takes precedence over gs_base_user. */ + union { u64 rip, eip; }; + u16 cs; + u8 saved_upcall_mask; + u8 _pad0[5]; + union { u64 rflags, eflags; }; + union { u64 rsp, esp; }; + u16 ss, _pad1[3]; + u16 es, _pad2[3]; + u16 ds, _pad3[3]; + u16 fs, _pad4[3]; /* Non-zero => takes precedence over fs_base. */ + u16 gs, _pad5[3]; /* Non-zero => takes precedence over gs_base_user. */ } cpu_user_regs_t; typedef u64 tsc_timestamp_t; /* RDTSC timestamp */ -- 2.30.2